/* Sync up with shared indexes. */
- FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring);
- BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring);
+ FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring, CONTROL_RING_MEM);
+ BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring, CONTROL_RING_MEM);
ctrl_if_evtchn = xen_start_info->domain_controller_evtchn;
ctrl_if_irq = bind_evtchn_to_irq(ctrl_if_evtchn);
for ( i = 0; i < 256; i++ )
ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
- FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring);
- BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring);
+ FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring, CONTROL_RING_MEM);
+ BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring, CONTROL_RING_MEM);
mtx_init(&ctrl_if_lock, "ctrlif", NULL, MTX_SPIN | MTX_NOWITNESS);
static blkif_front_ring_t blk_ring;
+#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
+
static unsigned long rec_ring_free;
-blkif_request_t rec_ring[RING_SIZE(&blk_ring)]; /* shadow recovery ring */
+blkif_request_t rec_ring[BLK_RING_SIZE];
/* XXX move to xb_vbd.c when VBD update support is added */
#define MAX_VBDS 64
{
unsigned long free = rec_ring_free;
- KASSERT(free <= RING_SIZE(&blk_ring), ("free %lu > RING_SIZE", free));
+ KASSERT(free <= BLK_RING_SIZE, ("free %lu > RING_SIZE", free));
rec_ring_free = rec_ring[free].id;
if (blk_ring.sring) free(blk_ring.sring, M_DEVBUF);
blk_ring.sring = (blkif_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK);
SHARED_RING_INIT(blk_ring.sring);
- FRONT_RING_INIT(&blk_ring, blk_ring.sring);
+ FRONT_RING_INIT(&blk_ring, blk_ring.sring, PAGE_SIZE);
blkif_state = BLKIF_STATE_DISCONNECTED;
blkif_send_interface_connect();
}
* This will need to be fixed once we have barriers */
/* Stage 1 : Find active and move to safety. */
- for ( i = 0; i < RING_SIZE(&blk_ring); i++ ) {
+ for ( i = 0; i < BLK_RING_SIZE; i++ ) {
if ( rec_ring[i].id >= KERNBASE ) {
req = RING_GET_REQUEST(&blk_ring,
blk_ring.req_prod_pvt);
}
/* Stage 3 : Set up free list. */
- for ( ; i < RING_SIZE(&blk_ring); i++ ){
+ for ( ; i < BLK_RING_SIZE; i++ ){
rec_ring[i].id = i+1;
}
rec_ring_free = blk_ring.req_prod_pvt;
- rec_ring[RING_SIZE(&blk_ring)-1].id = 0x0fffffff;
+ rec_ring[BLK_RING_SIZE-1].id = 0x0fffffff;
/* blk_ring.req_prod will be set when we flush_requests().*/
wmb();
printk("[XEN] Initialising virtual block device driver\n");
rec_ring_free = 0;
- for (i = 0; i < RING_SIZE(&blk_ring); i++) {
+ for (i = 0; i < BLK_RING_SIZE; i++) {
rec_ring[i].id = i+1;
}
- rec_ring[RING_SIZE(&blk_ring)-1].id = 0x0fffffff;
+ rec_ring[BLK_RING_SIZE-1].id = 0x0fffffff;
(void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx, 0);
}
/* Sync up with shared indexes. */
- FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring);
- BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring);
+ FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring, CONTROL_RING_MEM);
+ BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring, CONTROL_RING_MEM);
ctrl_if_evtchn = xen_start_info.domain_controller_evtchn;
ctrl_if_irq = bind_evtchn_to_irq(ctrl_if_evtchn);
for ( i = 0; i < 256; i++ )
ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
- FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring);
- BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring);
+ FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring, CONTROL_RING_MEM);
+ BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring, CONTROL_RING_MEM);
spin_lock_init(&ctrl_if_lock);
}
sring = (blkif_sring_t *)vma->addr;
SHARED_RING_INIT(sring);
- BACK_RING_INIT(&blkif->blk_ring, sring);
+ BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
blkif->evtchn = evtchn;
blkif->irq = bind_evtchn_to_irq(evtchn);
static blkif_front_ring_t blk_ring;
+#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
+
unsigned long rec_ring_free;
-blkif_request_t rec_ring[RING_SIZE(&blk_ring)];
+blkif_request_t rec_ring[BLK_RING_SIZE];
static int recovery = 0; /* "Recovery in progress" flag. Protected
* by the blkif_io_lock */
{
unsigned long free = rec_ring_free;
- if ( free > RING_SIZE(&blk_ring) )
- BUG();
+ BUG_ON(free > BLK_RING_SIZE);
rec_ring_free = rec_ring[free].id;
{
/* We kick pending request queues if the ring is reasonably empty. */
if ( (nr_pending != 0) &&
- (RING_PENDING_REQUESTS(&blk_ring) <
- (RING_SIZE(&blk_ring) >> 1)) )
+ (RING_PENDING_REQUESTS(&blk_ring) < (BLK_RING_SIZE >> 1)) )
{
/* Attempt to drain the queue, but bail if the ring becomes full. */
while ( (nr_pending != 0) && !RING_FULL(&blk_ring) )
sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL);
SHARED_RING_INIT(sring);
- FRONT_RING_INIT(&blk_ring, sring);
+ FRONT_RING_INIT(&blk_ring, sring, PAGE_SIZE);
blkif_state = BLKIF_STATE_DISCONNECTED;
blkif_send_interface_connect();
}
* This will need to be fixed once we have barriers */
/* Stage 1 : Find active and move to safety. */
- for ( i = 0; i < RING_SIZE(&blk_ring); i++ )
+ for ( i = 0; i < BLK_RING_SIZE; i++ )
{
if ( rec_ring[i].id >= PAGE_OFFSET )
{
}
/* Stage 3 : Set up free list. */
- for ( ; i < RING_SIZE(&blk_ring); i++ )
+ for ( ; i < BLK_RING_SIZE; i++ )
rec_ring[i].id = i+1;
rec_ring_free = blk_ring.req_prod_pvt;
- rec_ring[RING_SIZE(&blk_ring)-1].id = 0x0fffffff;
+ rec_ring[BLK_RING_SIZE-1].id = 0x0fffffff;
/* blk_ring->req_prod will be set when we flush_requests().*/
wmb();
printk(KERN_INFO "xen_blk: Initialising virtual block device driver\n");
rec_ring_free = 0;
- for ( i = 0; i < RING_SIZE(&blk_ring); i++ )
+ for ( i = 0; i < BLK_RING_SIZE; i++ )
rec_ring[i].id = i+1;
- rec_ring[RING_SIZE(&blk_ring)-1].id = 0x0fffffff;
+ rec_ring[BLK_RING_SIZE-1].id = 0x0fffffff;
(void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx,
CALLBACK_IN_BLOCKING_CONTEXT);
sring = (blkif_sring_t *)vma->addr;
SHARED_RING_INIT(sring);
- BACK_RING_INIT(&blkif->blk_ring, sring);
+ BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
blkif->evtchn = evtchn;
blkif->irq = bind_evtchn_to_irq(evtchn);
sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL);
SHARED_RING_INIT(sring);
- FRONT_RING_INIT(&blktap_be_ring, sring);
+ FRONT_RING_INIT(&blktap_be_ring, sring, PAGE_SIZE);
blktap_be_state = BLKIF_STATE_DISCONNECTED;
DPRINTK("Blkif-Passthrough-BE is now DISCONNECTED.\n");
blkif_ptbe_send_interface_connect();
SetPageReserved(virt_to_page(csring));
SHARED_RING_INIT(csring);
- FRONT_RING_INIT(&blktap_uctrl_ring, csring);
-
+ FRONT_RING_INIT(&blktap_uctrl_ring, csring, PAGE_SIZE);
/* Allocate the fe ring. */
sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
SetPageReserved(virt_to_page(sring));
SHARED_RING_INIT(sring);
- FRONT_RING_INIT(&blktap_ufe_ring, sring);
+ FRONT_RING_INIT(&blktap_ufe_ring, sring, PAGE_SIZE);
/* Allocate the be ring. */
sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
SetPageReserved(virt_to_page(sring));
SHARED_RING_INIT(sring);
- BACK_RING_INIT(&blktap_ube_ring, sring);
+ BACK_RING_INIT(&blktap_ube_ring, sring, PAGE_SIZE);
DPRINTK(KERN_ALERT "blktap open.\n");
sring = (usbif_sring_t *)vma->addr;
SHARED_RING_INIT(sring);
- BACK_RING_INIT(&up->usb_ring, sring);
+ BACK_RING_INIT(&up->usb_ring, sring, PAGE_SIZE);
up->evtchn = evtchn;
up->irq = bind_evtchn_to_irq(evtchn);
/* Move from CLOSED to DISCONNECTED state. */
sring = (usbif_sring_t *)__get_free_page(GFP_KERNEL);
SHARED_RING_INIT(sring);
- FRONT_RING_INIT(&xhci->usb_ring, sring);
+ FRONT_RING_INIT(&xhci->usb_ring, sring, PAGE_SIZE);
xhci->state = USBIF_STATE_DISCONNECTED;
/* Construct an interface-CONNECT message for the domain controller. */
/* assign the rings to the mapped memory */
csring = (ctrl_sring_t *)blktap_mem;
- BACK_RING_INIT(&ctrl_ring, csring);
+ BACK_RING_INIT(&ctrl_ring, csring, CONTROL_RING_MEM);
sring = (blkif_sring_t *)((unsigned long)blktap_mem + PAGE_SIZE);
- FRONT_RING_INIT(&be_ring, sring);
+ FRONT_RING_INIT(&be_ring, sring, PAGE_SIZE);
sring = (blkif_sring_t *)((unsigned long)blktap_mem + (2 *PAGE_SIZE));
- BACK_RING_INIT(&fe_ring, sring);
+ BACK_RING_INIT(&fe_ring, sring, PAGE_SIZE);
mmap_vstart = (unsigned long)blktap_mem + (BLKTAP_RING_PAGES << PAGE_SHIFT);
}
/* Synchronise ring indexes. */
- BACK_RING_ATTACH(&cc->tx_ring, &cc->interface->tx_ring);
- FRONT_RING_ATTACH(&cc->rx_ring, &cc->interface->rx_ring);
+ BACK_RING_ATTACH(&cc->tx_ring, &cc->interface->tx_ring, CONTROL_RING_MEM);
+ FRONT_RING_ATTACH(&cc->rx_ring, &cc->interface->rx_ring, CONTROL_RING_MEM);
cc->connected = 1;
* Generate blkif ring structures and types.
*/
-DEFINE_RING_TYPES(blkif, blkif_request_t, blkif_response_t, PAGE_SIZE);
+DEFINE_RING_TYPES(blkif, blkif_request_t, blkif_response_t);
/*
* BLKIF_OP_PROBE:
* two 32-bit counters: (64 * 8) + (2 * 4) = 520
*/
#define CONTROL_RING_MEM 520
-DEFINE_RING_TYPES(ctrl, control_msg_t, control_msg_t, CONTROL_RING_MEM);
+DEFINE_RING_TYPES(ctrl, control_msg_t, control_msg_t);
typedef struct {
- ctrl_sring_t tx_ring; /* 0: guest -> controller */
- ctrl_sring_t rx_ring; /* 520: controller -> guest */
+ union {
+ ctrl_sring_t tx_ring; /* 0: guest -> controller */
+ char __x[CONTROL_RING_MEM];
+ } PACKED;
+ union {
+ ctrl_sring_t rx_ring; /* 520: controller -> guest */
+ char __y[CONTROL_RING_MEM];
+ } PACKED;
} PACKED control_if_t; /* 1040 bytes */
/*
/*
* Calculate size of a shared ring, given the total available space for the
* ring and indexes (_sz), and the name tag of the request/response structure.
- * S ring contains as many entries as will fit, rounded down to the nearest
+ * A ring contains as many entries as will fit, rounded down to the nearest
* power of two (so we can mask with (size-1) to loop around).
*/
-#define __RING_SIZE(_name, _sz) \
- (__RD32(((_sz) - 2*sizeof(RING_IDX)) / sizeof(union _name##_sring_entry)))
+#define __RING_SIZE(_s, _sz) \
+ (__RD32(((_sz) - 2*sizeof(RING_IDX)) / sizeof((_s)->ring[0])))
/*
* Macros to make the correct C datatypes for a new kind of ring.
*
* To make a new ring datatype, you need to have two message structures,
- * let's say request_t, and response_t already defined. You also need to
- * know how big the shared memory region you want the ring to occupy is
- * (PAGE_SIZE, of instance).
+ * let's say request_t, and response_t already defined.
*
* In a header where you want the ring datatype declared, you then do:
*
- * DEFINE_RING_TYPES(mytag, request_t, response_t, PAGE_SIZE);
+ * DEFINE_RING_TYPES(mytag, request_t, response_t);
*
* These expand out to give you a set of types, as you can see below.
* The most important of these are:
* mytag_front_ring_t - The 'front' half of the ring.
* mytag_back_ring_t - The 'back' half of the ring.
*
- * To initialize a ring in your code, on the front half, you do:
+ * To initialize a ring in your code you need to know the location and size
+ * of the shared memory area (PAGE_SIZE, for instance). To initialise
+ * the front half:
*
* mytag_front_ring_t front_ring;
*
* SHARED_RING_INIT((mytag_sring_t *)shared_page);
- * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page);
+ * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
*
* Initializing the back follows similarly...
*/
-#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t, __size) \
+#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
\
/* Shared ring entry */ \
union __name##_sring_entry { \
__req_t req; \
__rsp_t rsp; \
-} PACKED; \
+}; \
\
/* Shared ring page */ \
struct __name##_sring { \
RING_IDX req_prod; \
RING_IDX rsp_prod; \
- union __name##_sring_entry ring[__RING_SIZE(__name, __size)]; \
-} PACKED; \
+ union __name##_sring_entry ring[1]; /* variable-length */ \
+}; \
\
/* "Front" end's private variables */ \
struct __name##_front_ring { \
RING_IDX req_prod_pvt; \
RING_IDX rsp_cons; \
+ unsigned int nr_ents; \
struct __name##_sring *sring; \
}; \
\
struct __name##_back_ring { \
RING_IDX rsp_prod_pvt; \
RING_IDX req_cons; \
+ unsigned int nr_ents; \
struct __name##_sring *sring; \
}; \
\
* outstanding requests.
*/
-
/* Initialising empty rings */
#define SHARED_RING_INIT(_s) do { \
(_s)->req_prod = 0; \
(_s)->rsp_prod = 0; \
} while(0)
-#define FRONT_RING_INIT(_r, _s) do { \
+#define FRONT_RING_INIT(_r, _s, __size) do { \
(_r)->req_prod_pvt = 0; \
(_r)->rsp_cons = 0; \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
(_r)->sring = (_s); \
} while (0)
-#define BACK_RING_INIT(_r, _s) do { \
+#define BACK_RING_INIT(_r, _s, __size) do { \
(_r)->rsp_prod_pvt = 0; \
(_r)->req_cons = 0; \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
(_r)->sring = (_s); \
} while (0)
/* Initialize to existing shared indexes -- for recovery */
-#define FRONT_RING_ATTACH(_r, _s) do { \
+#define FRONT_RING_ATTACH(_r, _s, __size) do { \
(_r)->sring = (_s); \
(_r)->req_prod_pvt = (_s)->req_prod; \
(_r)->rsp_cons = (_s)->rsp_prod; \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
} while (0)
-#define BACK_RING_ATTACH(_r, _s) do { \
+#define BACK_RING_ATTACH(_r, _s, __size) do { \
(_r)->sring = (_s); \
(_r)->rsp_prod_pvt = (_s)->rsp_prod; \
(_r)->req_cons = (_s)->req_prod; \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
} while (0)
/* How big is this ring? */
#define RING_SIZE(_r) \
- (sizeof((_r)->sring->ring)/sizeof((_r)->sring->ring[0]))
+ ((_r)->nr_ents)
/* How many empty slots are on a ring? */
#define RING_PENDING_REQUESTS(_r) \
#define USBIF_OP_RESET 2 /* Reset a virtual USB port. */
typedef struct {
- unsigned long id; /* 0: private guest value, echoed in resp */
- u8 operation; /* 4: USBIF_OP_??? */
+ unsigned long id; /* private guest value, echoed in resp */
+ u8 operation; /* USBIF_OP_??? */
u8 __pad1;
- usbif_vdev_t port; /* 6 : guest virtual USB port */
- unsigned long devnum :7; /* 8 : Device address, as seen by the guest.*/
+ usbif_vdev_t port; /* guest virtual USB port */
+ unsigned long devnum :7; /* Device address, as seen by the guest.*/
unsigned long endpoint :4; /* Device endpoint. */
unsigned long direction :1; /* Pipe direction. */
unsigned long speed :1; /* Pipe speed. */
unsigned long pipe_type :2; /* Pipe type (iso, bulk, int, ctrl) */
unsigned long __pad2 :18;
- unsigned long transfer_buffer; /* 12: Machine address */
- unsigned long length; /* 16: Buffer length */
- unsigned long transfer_flags; /* 20: For now just pass Linux transfer
+ unsigned long transfer_buffer; /* Machine address */
+ unsigned long length; /* Buffer length */
+ unsigned long transfer_flags; /* For now just pass Linux transfer
* flags - this may change. */
- unsigned char setup[8]; /* 22 Embed setup packets directly. */
- unsigned long iso_schedule; /* 30 Machine address of transfer sched (iso
+ unsigned char setup[8]; /* Embed setup packets directly. */
+ unsigned long iso_schedule; /* Machine address of transfer sched (iso
* only) */
- unsigned long num_iso; /* 34 : length of iso schedule */
- unsigned long timeout; /* 38: timeout in ms */
-} PACKED usbif_request_t; /* 42 */
+ unsigned long num_iso; /* length of iso schedule */
+ unsigned long timeout; /* timeout in ms */
+} usbif_request_t;
/* Data we need to pass:
* - Transparently handle short packets or complain at us?
*/
typedef struct {
- unsigned long id; /* 0: copied from request */
- u8 operation; /* 4: copied from request */
- u8 data; /* 5: Small chunk of in-band data */
- s16 status; /* 6: USBIF_RSP_??? */
+ unsigned long id; /* copied from request */
+ u8 operation; /* copied from request */
+ u8 data; /* Small chunk of in-band data */
+ s16 status; /* USBIF_RSP_??? */
unsigned long transfer_mutex; /* Used for cancelling requests atomically. */
- unsigned long length; /* 8: How much data we really got */
-} PACKED usbif_response_t;
+ unsigned long length; /* How much data we really got */
+} usbif_response_t;
#define USBIF_RSP_ERROR -1 /* non-specific 'error' */
#define USBIF_RSP_OKAY 0 /* non-specific 'okay' */
-DEFINE_RING_TYPES(usbif, usbif_request_t, usbif_response_t, PAGE_SIZE);
+DEFINE_RING_TYPES(usbif, usbif_request_t, usbif_response_t);
typedef struct {
unsigned long length; /* IN = expected, OUT = actual */